home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Libris Britannia 4
/
science library(b).zip
/
science library(b)
/
DDJMAG
/
DDJ9206.ZIP
/
386BSD.692
next >
Wrap
Text File
|
1992-05-18
|
31KB
|
876 lines
_PORTING UNIX TO THE 386: THE MISSING PIECES, PART II_
by William Jolitz and Lynne Greer Jolitz
[LISTING ONE]
/* Copyright (c) 1989, 1990, 1991, 1992 William F. Jolitz, TeleMuse
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This software is a component of "386BSD" developed by
* William F. Jolitz, TeleMuse.
* 4. Neither the name of the developer nor the name "386BSD" may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
* THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
* AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS
* SOFTWARE SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT.
* THE DEVELOPER URGES THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT
* NOT MAKE USE OF THIS WORK. THIS SOFTWARE IS PROVIDED BY THE DEVELOPER
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This procedure implements a minimal program execution facility for
* 386BSD. It interfaces to the BSD kernel as the execve system call.
* Significant limitations and lack of compatiblity with POSIX are
* present with this version, to make its basic operation more clear.
*/
#include "param.h"
#include "systm.h"
#include "proc.h"
#include "mount.h"
#include "namei.h"
#include "vnode.h"
#include "file.h"
#include "exec.h"
#include "stat.h"
#include "wait.h"
#include "signalvar.h"
#include "mman.h"
#include "malloc.h"
#include "vm/vm.h"
#include "vm/vm_param.h"
#include "vm/vm_map.h"
#include "vm/vm_kern.h"
#include "machine/reg.h"
extern int dostacklimits;
/* execve() system call. */
/* ARGSUSED */
execve(p, uap, retval)
struct proc *p;
register struct args {
char *fname;
char **argp;
char **envp;
} *uap;
int *retval;
{
register struct nameidata *ndp;
struct nameidata nd;
struct exec hdr;
char **argbuf, **argbufp, *stringbuf, *stringbufp;
char **vectp, *ep;
int needsenv, limitonargs, stringlen, addr, size, len,
rv, amt, argc, tsize, dsize, bsize, cnt, foff;
struct vattr attr;
struct vmspace *vs;
caddr_t newframe;
/* Step 1. Lookup filename to see if we have something to execute. */
ndp = &nd;
ndp->ni_nameiop = LOOKUP | LOCKLEAF | FOLLOW;
ndp->ni_segflg = UIO_USERSPACE;
ndp->ni_dirp = uap->fname;
/* is it there? */
if (rv = namei(ndp, p))
return (rv);
/* is it a regular file? */
if (ndp->ni_vp->v_type != VREG) {
vput(ndp->ni_vp);
return(ENOEXEC);
}
/* is it executable? */
rv = VOP_ACCESS(ndp->ni_vp, VEXEC, p->p_ucred, p);
if (rv)
goto exec_fail;
/* does it have any attributes? */
rv = VOP_GETATTR(ndp->ni_vp, &attr, p->p_ucred, p);
if (rv)
goto exec_fail;
/* Step 2. Does file contain a format we can understand and execute */
rv = vn_rdwr(UIO_READ, ndp->ni_vp, (caddr_t)&hdr, sizeof(hdr),
0, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &amt, p);
/* big enough to hold a header? */
if (rv)
goto exec_fail;
/* ... that we recognize? */
rv = ENOEXEC;
if (hdr.a_magic != ZMAGIC)
goto exec_fail;
/* sanity check "ain't not such thing as a sanity clause" -groucho */
if (hdr.a_text > MAXTSIZ
|| hdr.a_text % NBPG || hdr.a_text > attr.va_size)
goto exec_fail;
if (hdr.a_data == 0 || hdr.a_data > DFLDSIZ
|| hdr.a_data > attr.va_size
|| hdr.a_data + hdr.a_text > attr.va_size)
goto exec_fail;
if (hdr.a_bss > MAXDSIZ)
goto exec_fail;
if (hdr.a_text + hdr.a_data + hdr.a_bss > MAXTSIZ + MAXDSIZ)
goto exec_fail;
/* Step 3. File and header are valid. Now, dig out the strings
* out of the old process image. */
/* We implement a single-pass algorithm that builds a new stack
* frame within the address space of the "old" process image,
* avoiding the second pass entirely. Thus, the new frame is
* in position to be run. This consumes much virtual address space,
* and two pages more of 'real' memory, such are the costs.
* [Also, note the cache wipe that's avoided!] */
/* create anonymous memory region for new stack */
vs = p->p_vmspace;
if ((unsigned)vs->vm_maxsaddr + MAXSSIZ < USRSTACK)
newframe = (caddr_t) USRSTACK - MAXSSIZ;
else
vs->vm_maxsaddr = newframe = (caddr_t) USRSTACK - 2*MAXSSIZ;
/* don't do stack limit checking on traps temporarily XXX*/
dostacklimits = 0;
rv = vm_allocate(&vs->vm_map, &newframe, MAXSSIZ, FALSE);
if (rv) goto exec_fail;
/* allocate string buffer and arg buffer */
argbuf = (char **) (newframe + MAXSSIZ - 3*ARG_MAX);
stringbuf = stringbufp = ((char *)argbuf) + 2*ARG_MAX;
argbufp = argbuf;
/* first, do args */
vectp = uap->argp;
needsenv = 1;
limitonargs = ARG_MAX;
cnt = 0;
do_env_as_well:
if(vectp == 0) goto dont_bother;
/* for each envp, copy in string */
do {
/* did we outgrow initial argbuf, if so, die */
if (argbufp == (char **)stringbuf) {
rv = E2BIG;
goto exec_dealloc;
}
/* get an string pointer */
ep = (char *)fuword(vectp++);
if (ep == (char *)-1) {
rv = EFAULT;
goto exec_dealloc;
}
/* if not a null pointer, copy string */
if (ep) {
if (rv = copyinoutstr(ep, stringbufp,
(u_int)limitonargs, (u_int *) &stringlen)) {
if (rv == ENAMETOOLONG)
rv = E2BIG;
goto exec_dealloc;
}
suword(argbufp++, (int)stringbufp);
cnt++;
stringbufp += stringlen;
limitonargs -= stringlen;
} else {
suword(argbufp++, 0);
break;
}
} while (limitonargs > 0);
dont_bother:
if (limitonargs <= 0) {
rv = E2BIG;
goto exec_dealloc;
}
/* have we done the environment yet ? */
if (needsenv) {
/* remember the arg count for later */
argc = cnt;
vectp = uap->envp;
needsenv = 0;
goto do_env_as_well;
}
/* At this point, one could optionally implement a second pass to
* condense strings, arguement vectors, and stack to fit fewest pages.
* One might selectively do this when copying was cheaper
* than leaving allocated two more pages per process. */
/* stuff arg count on top of "new" stack */
argbuf[-1] = (char *)argc;
/* Step 4. Build the new processes image. At this point, we are
* committed -- destroy old executable! */
/* blow away all address space, except the stack */
rv = vm_deallocate(&vs->vm_map, 0, USRSTACK - 2*MAXSSIZ, FALSE);
if (rv)
goto exec_abort;
/* destroy "old" stack */
if ((unsigned)newframe < USRSTACK - MAXSSIZ) {
rv = vm_deallocate(&vs->vm_map, USRSTACK - MAXSSIZ, MAXSSIZ,
FALSE);
if (rv)
goto exec_abort;
} else {
rv = vm_deallocate(&vs->vm_map, USRSTACK - 2*MAXSSIZ, MAXSSIZ,
FALSE);
if (rv)
goto exec_abort;
}
/* build a new address space */
addr = 0;
/* screwball mode -- special case of 413 to save space for floppy */
if (hdr.a_text == 0) {
foff = tsize = 0;
hdr.a_data += hdr.a_text;
} else {
tsize = roundup(hdr.a_text, NBPG);
foff = NBPG;
}
/* treat text and data in terms of integral page size */
dsize = roundup(hdr.a_data, NBPG);
bsize = roundup(hdr.a_bss + dsize, NBPG);
bsize -= dsize;
/* map text & data in file, as being "paged in" on demand */
rv = vm_mmap(&vs->vm_map, &addr, tsize+dsize, VM_PROT_ALL,
MAP_FILE|MAP_COPY|MAP_FIXED, (caddr_t)ndp->ni_vp, foff);
if (rv)
goto exec_abort;
/* mark pages r/w data, r/o text */
if (tsize) {
addr = 0;
rv = vm_protect(&vs->vm_map, addr, tsize, FALSE,
VM_PROT_READ|VM_PROT_EXECUTE);
if (rv)
goto exec_abort;
}
/* create anonymous memory region for bss */
addr = dsize + tsize;
rv = vm_allocate(&vs->vm_map, &addr, bsize, FALSE);
if (rv)
goto exec_abort;
/* Step 5. Prepare process for execution. */
/* touchup process information -- vm system is unfinished! */
vs->vm_tsize = tsize/NBPG; /* text size (pages) XXX */
vs->vm_dsize = (dsize+bsize)/NBPG; /* data size (pages) XXX */
vs->vm_taddr = 0; /* user virtual address of text XXX */
vs->vm_daddr = (caddr_t)tsize; /* user virtual address of data XXX */
vs->vm_maxsaddr = newframe; /* user VA at max stack growth XXX */
vs->vm_ssize = ((unsigned)vs->vm_maxsaddr + MAXSSIZ
- (unsigned)argbuf)/ NBPG + 1; /* stack size (pages) */
dostacklimits = 1; /* allow stack limits to be enforced XXX */
/* close files on exec, fixup signals */
fdcloseexec(p);
execsigs(p);
/* setup initial register state */
p->p_regs[SP] = (unsigned) (argbuf - 1);
setregs(p, hdr.a_entry);
vput(ndp->ni_vp);
return (0);
exec_dealloc:
/* remove interim "new" stack frame we were building */
vm_deallocate(&vs->vm_map, newframe, MAXSSIZ, FALSE);
exec_fail:
dostacklimits = 1;
vput(ndp->ni_vp);
return(rv);
exec_abort:
/* sorry, no more process anymore. exit gracefully */
vm_deallocate(&vs->vm_map, newframe, MAXSSIZ, FALSE);
vput(ndp->ni_vp);
exit(p, W_EXITCODE(0, SIGABRT));
/* NOTREACHED */
return(0);
}
[LISTING TWO]
/* Copyright (c) 1992 William Jolitz. All rights reserved.
* Written by William Jolitz 1/92
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This software is a component of "386BSD" developed by
William F. Jolitz, TeleMuse.
* 4. Neither the name of the developer nor the name "386BSD"
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
* AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS SOFTWARE
* SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT. THE DEVELOPER URGES
* THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT NOT MAKE USE OF THIS WORK. THIS
* SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE DEVELOPER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Block I/O Cache mechanism, ala malloc(). */
#include "param.h"
#include "proc.h"
#include "vnode.h"
#include "buf.h"
#include "specdev.h"
#include "mount.h"
#include "malloc.h"
#include "resourcevar.h"
/* Initialize buffer headers and related structures. */
void bufinit()
{
struct bufhd *bh;
struct buf *bp;
/* first, make a null hash table */
for(bh = bufhash; bh < bufhash + BUFHSZ; bh++) {
bh->b_flags = 0;
bh->b_forw = (struct buf *)bh;
bh->b_back = (struct buf *)bh;
}
/* next, make a null set of free lists */
for(bp = bfreelist; bp < bfreelist + BQUEUES; bp++) {
bp->b_flags = 0;
bp->av_forw = bp;
bp->av_back = bp;
bp->b_forw = bp;
bp->b_back = bp;
}
/* finally, initialize each buffer header and stick on empty q */
for(bp = buf; bp < buf + nbuf ; bp++) {
bp->b_flags = B_HEAD | B_INVAL; /* we're just an empty header */
bp->b_dev = NODEV;
bp->b_vp = 0;
binstailfree(bp, bfreelist + BQ_EMPTY);
binshash(bp, bfreelist + BQ_EMPTY);
}
}
/* Find the block in the buffer pool. If buffer is not present, allocate a new
* buffer and load its contents according to the filesystem fill routine. */
bread(vp, blkno, size, cred, bpp)
struct vnode *vp;
daddr_t blkno;
int size;
struct ucred *cred;
struct buf **bpp;
{
struct buf *bp;
int rv = 0;
bp = getblk (vp, blkno, size);
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
bp->b_flags |= B_READ;
bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
VOP_STRATEGY(bp);
rv = biowait (bp);
}
*bpp = bp;
return (rv);
}
/* Operates like bread, but also starts I/O on the specified read-ahead block.
* [See page 55 of Bach's Book] */
breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
struct vnode *vp;
daddr_t blkno; int size;
daddr_t rablkno; int rabsize;
struct ucred *cred;
struct buf **bpp;
{
struct buf *bp, *rabp;
int rv = 0, needwait = 0;
bp = getblk (vp, blkno, size);
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0 || (bp->b_flags & B_INVAL) != 0) {
bp->b_flags |= B_READ;
bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
VOP_STRATEGY(bp);
needwait++;
}
rabp = getblk (vp, rablkno, rabsize);
/* if not found in cache, do some I/O (overlapped with first) */
if ((rabp->b_flags & B_CACHE) == 0 || (rabp->b_flags & B_INVAL) != 0) {
rabp->b_flags |= B_READ | B_ASYNC;
rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
VOP_STRATEGY(rabp);
} else
brelse(rabp);
/* wait for original I/O */
if (needwait)
rv = biowait (bp);
*bpp = bp;
return (rv);
}
/* Synchronous write. Release buffer on completion. */
bwrite(bp)
register struct buf *bp;
{
int rv;
if(bp->b_flags & B_INVAL) {
brelse(bp);
return (0);
} else {
int wasdelayed;
wasdelayed = bp->b_flags & B_DELWRI;
bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_ASYNC|B_DELWRI);
if(wasdelayed) reassignbuf(bp, bp->b_vp);
bp->b_flags |= B_DIRTY;
VOP_STRATEGY(bp);
rv = biowait(bp);
if (!rv)
bp->b_flags &= ~B_DIRTY;
brelse(bp);
return (rv);
}
}
/* Delayed write. The buffer is marked dirty, but is not queued for I/O. This
* routine should be used when the buffer is expected to be modified again
* soon, typically a small write that partially fills a buffer. NB: magnetic
* tapes can't be delayed; must be written in order writes are requested. */
void bdwrite(bp)
register struct buf *bp;
{
if(bp->b_flags & B_INVAL)
brelse(bp);
if(bp->b_flags & B_TAPE) {
bwrite(bp);
return;
}
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags |= B_DIRTY|B_DELWRI;
reassignbuf(bp, bp->b_vp);
brelse(bp);
return;
}
/* Asynchronous write. Start I/O on a buffer, but do not wait for it to
* complete. The buffer is released when the I/O completes. */
bawrite(bp)
register struct buf *bp;
{
if(!(bp->b_flags & B_BUSY))panic("bawrite: not busy");
if(bp->b_flags & B_INVAL)
brelse(bp);
else {
int wasdelayed;
wasdelayed = bp->b_flags & B_DELWRI;
bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
if(wasdelayed) reassignbuf(bp, bp->b_vp);
bp->b_flags |= B_DIRTY | B_ASYNC;
VOP_STRATEGY(bp);
}
}
/* Release a buffer. Even if the buffer is dirty, no I/O is started. */
brelse(bp)
register struct buf *bp;
{
int x;
/* anyone need a "free" block? */
x=splbio();
if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) {
(bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED;
wakeup(bfreelist);
}
/* anyone need this very block? */
if (bp->b_flags & B_WANTED) {
bp->b_flags &= ~B_WANTED;
wakeup(bp);
}
if (bp->b_flags & (B_INVAL|B_ERROR)) {
bp->b_flags |= B_INVAL;
bp->b_flags &= ~(B_DELWRI|B_CACHE);
if(bp->b_vp)
brelvp(bp);
}
/* enqueue */
/* buffers with junk contents */
if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE))
binsheadfree(bp, bfreelist + BQ_AGE)
/* buffers with stale but valid contents */
else if(bp->b_flags & B_AGE)
binstailfree(bp, bfreelist + BQ_AGE)
/* buffers with valid and quite potentially reuseable contents */
else
binstailfree(bp, bfreelist + BQ_LRU)
/* unlock */
bp->b_flags &= ~B_BUSY;
splx(x);
return;
}
int freebufspace = 20*NBPG;
int allocbufspace;
/* Find a buffer which is available for use. If free memory for buffer space
* and an empty header from the empty list, use that. Otherwise, select
* something from a free list. Preference is to AGE list, then LRU list. */
struct buf *
getnewbuf(sz)
{
struct buf *bp;
int x;
x = splbio();
start:
/* can we constitute a new buffer? */
if (freebufspace > sz
&& bfreelist[BQ_EMPTY].av_forw != (struct buf *)bfreelist+BQ_EMPTY) {
caddr_t addr;
if ((addr = malloc (sz, M_TEMP, M_NOWAIT)) == 0) goto tryfree;
freebufspace -= sz;
allocbufspace += sz;
bp = bfreelist[BQ_EMPTY].av_forw;
bp->b_flags = B_BUSY | B_INVAL;
bremfree(bp);
bp->b_un.b_addr = (caddr_t) addr;
goto fillin;
}
tryfree:
if (bfreelist[BQ_AGE].av_forw != (struct buf *)bfreelist+BQ_AGE) {
bp = bfreelist[BQ_AGE].av_forw;
bremfree(bp);
} else if (bfreelist[BQ_LRU].av_forw != (struct buf *)bfreelist+BQ_LRU) {
bp = bfreelist[BQ_LRU].av_forw;
bremfree(bp);
} else {
/* wait for a free buffer of any kind */
(bfreelist + BQ_AGE)->b_flags |= B_WANTED;
sleep(bfreelist, PRIBIO);
splx(x);
return (0);
}
/* if we are a delayed write, convert to an async write! */
if (bp->b_flags & B_DELWRI) {
bp->b_flags |= B_BUSY;
bawrite (bp);
goto start;
}
if(bp->b_vp)
brelvp(bp);
/* we are not free, nor do we contain interesting data */
bp->b_flags = B_BUSY;
fillin:
bremhash(bp);
splx(x);
bp->b_dev = NODEV;
bp->b_vp = NULL;
bp->b_blkno = bp->b_lblkno = 0;
bp->b_iodone = 0;
bp->b_error = 0;
bp->b_wcred = bp->b_rcred = NOCRED;
if (bp->b_bufsize != sz) allocbuf(bp, sz);
bp->b_bcount = bp->b_bufsize = sz;
bp->b_dirtyoff = bp->b_dirtyend = 0;
return (bp);
}
/* Check to see if a block is currently memory resident. */
struct buf *incore(vp, blkno)
struct vnode *vp;
daddr_t blkno;
{
struct buf *bh;
struct buf *bp;
bh = BUFHASH(vp, blkno);
/* Search hash chain */
bp = bh->b_forw;
while (bp != (struct buf *) bh) {
/* hit */
if (bp->b_lblkno == blkno && bp->b_vp == vp
&& (bp->b_flags & B_INVAL) == 0)
return (bp);
bp = bp->b_forw;
}
return(0);
}
/* Get a block of requested size that is associated with a given vnode and
* block offset. If it is found in block cache, mark it as found, make it busy
* and return it. Otherwise, return empty block of the correct size. It is up
* to caller to insure that the cached blocks be of the correct size. */
struct buf *
getblk(vp, blkno, size)
register struct vnode *vp;
daddr_t blkno;
int size;
{
struct buf *bp, *bh;
int x;
for (;;) {
if (bp = incore(vp, blkno)) {
x = splbio();
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
sleep (bp, PRIBIO);
continue;
}
bp->b_flags |= B_BUSY | B_CACHE;
bremfree(bp);
if (size > bp->b_bufsize)
panic("now what do we do?");
} else {
if((bp = getnewbuf(size)) == 0) continue;
bp->b_blkno = bp->b_lblkno = blkno;
bgetvp(vp, bp);
x = splbio();
bh = BUFHASH(vp, blkno);
binshash(bp, bh);
bp->b_flags = B_BUSY;
}
splx(x);
return (bp);
}
}
/* Get an empty, disassociated buffer of given size. */
struct buf *
geteblk(size)
int size;
{
struct buf *bp;
int x;
while ((bp = getnewbuf(size)) == 0)
;
x = splbio();
binshash(bp, bfreelist + BQ_AGE);
splx(x);
return (bp);
}
/* Exchange a buffer's underlying buffer storage for one of different size,
* taking care to maintain contents appropriately. When buffer increases in
* size, caller is responsible for filling out additional contents. When buffer
* shrinks in size, data is lost, so caller must first return it to backing
* store before shrinking the buffer, as no implied I/O will be done.
* Expanded buffer is returned as value. */
struct buf *
allocbuf(bp, size)
register struct buf *bp;
int size;
{
caddr_t newcontents;
/* get new memory buffer */
newcontents = (caddr_t) malloc (size, M_TEMP, M_WAITOK);
/* copy the old into the new, up to the maximum that will fit */
bcopy (bp->b_un.b_addr, newcontents, min(bp->b_bufsize, size));
/* return old contents to free heap */
free (bp->b_un.b_addr, M_TEMP);
/* adjust buffer cache's idea of memory allocated to buffer contents */
freebufspace -= size - bp->b_bufsize;
allocbufspace += size - bp->b_bufsize;
/* update buffer header */
bp->b_un.b_addr = newcontents;
bp->b_bcount = bp->b_bufsize = size;
return(bp);
}
/* Patiently await operations to complete on this buffer. When they do,
* extract error value and return it. Extract and return any errors associated
* with the I/O. If an invalid block, force it off the lookup hash chains. */
biowait(bp)
register struct buf *bp;
{
int x;
x = splbio();
while ((bp->b_flags & B_DONE) == 0)
sleep((caddr_t)bp, PRIBIO);
if((bp->b_flags & B_ERROR) || bp->b_error) {
if ((bp->b_flags & B_INVAL) == 0) {
bp->b_flags |= B_INVAL;
bremhash(bp);
binshash(bp, bfreelist + BQ_AGE);
}
if (!bp->b_error)
bp->b_error = EIO;
else
bp->b_flags |= B_ERROR;
splx(x);
return (bp->b_error);
} else {
splx(x);
return (0);
}
}
/* Finish up operations on a buffer, calling an optional function (if
* requested), and releasing the buffer if marked asynchronous. Then mark this
* buffer done so that others biowait()'ing for it will notice when they are
* woken up from sleep(). */
biodone(bp)
register struct buf *bp;
{
int x;
x = splbio();
if (bp->b_flags & B_CALL) (*bp->b_iodone)(bp);
bp->b_flags &= ~B_CALL;
if (bp->b_flags & B_ASYNC) brelse(bp);
bp->b_flags &= ~B_ASYNC;
bp->b_flags |= B_DONE;
wakeup(bp);
splx(x);
}
[LISTING THREE]
/* Copyright (c) 1992 William F. Jolitz. All rights reserved.
* Written by William Jolitz 1/92
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met: 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This software is a component of "386BSD" developed by
William F. Jolitz, TeleMuse.
* 4. Neither the name of the developer nor the name "386BSD"
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS A COMPONENT OF 386BSD DEVELOPED BY WILLIAM F. JOLITZ
* AND IS INTENDED FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY. THIS SOFTWARE
* SHOULD NOT BE CONSIDERED TO BE A COMMERCIAL PRODUCT. THE DEVELOPER URGES
* THAT USERS WHO REQUIRE A COMMERCIAL PRODUCT NOT MAKE USE OF THIS WORK. THIS
* SOFTWARE IS PROVIDED BY THE DEVELOPER ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE DEVELOPER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Ring Buffer code for 386BSD. */
#include "param.h"
#include "systm.h"
#include "buf.h"
#include "ioctl.h"
#include "tty.h"
putc(c, rbp) struct ringb *rbp;
{
char *nxtp;
/* ring buffer full? */
if ( (nxtp = RB_SUCC(rbp, rbp->rb_tl)) == rbp->rb_hd) return (-1);
/* stuff character */
*rbp->rb_tl = c;
rbp->rb_tl = nxtp;
return(0);
}
getc(rbp) struct ringb *rbp;
{
u_char c;
/* ring buffer empty? */
if (rbp->rb_hd == rbp->rb_tl) return(-1);
/* fetch character, locate next character */
c = *(u_char *) rbp->rb_hd;
rbp->rb_hd = RB_SUCC(rbp, rbp->rb_hd);
return (c);
}
nextc(cpp, rbp) struct ringb *rbp; char **cpp; {
if (*cpp == rbp->rb_tl) return (0);
else { char *cp;
cp = *cpp;
*cpp = RB_SUCC(rbp, cp);
return(*cp);
}
}
ungetc(c, rbp) struct ringb *rbp;
{
char *backp;
/* ring buffer full? */
if ( (backp = RB_PRED(rbp, rbp->rb_hd)) == rbp->rb_tl) return (-1);
rbp->rb_hd = backp;
/* stuff character */
*rbp->rb_hd = c;
return(0);
}
unputc(rbp) struct ringb *rbp;
{
char *backp;
int c;
/* ring buffer empty? */
if (rbp->rb_hd == rbp->rb_tl) return(-1);
/* backup buffer and dig out previous character */
backp = RB_PRED(rbp, rbp->rb_tl);
c = *(u_char *)backp;
rbp->rb_tl = backp;
return(c);
}
#define peekc(rbp) (*(rbp)->rb_hd)
initrb(rbp) struct ringb *rbp; {
rbp->rb_hd = rbp->rb_tl = rbp->rb_buf;
}
/* Example code for contiguous operations:
...
nc = RB_CONTIGPUT(&rb);
if (nc) {
if (nc > 9) nc = 9;
bcopy("ABCDEFGHI", rb.rb_tl, nc);
rb.rb_tl += nc;
rb.rb_tl = RB_ROLLOVER(&rb, rb.rb_tl);
}
...
...
nc = RB_CONTIGGET(&rb);
if (nc) {
if (nc > 79) nc = 79;
bcopy(rb.rb_hd, stringbuf, nc);
rb.rb_hd += nc;
rb.rb_hd = RB_ROLLOVER(&rb, rb.rb_hd);
stringbuf[nc] = 0;
printf("%s|", stringbuf);
}
...
*/
/* Concatinate ring buffers. */
catb(from, to)
struct ringb *from, *to;
{
char c;
while ((c = getc(from)) >= 0)
putc(c, to);
}
[LISTING FOUR]
/* [Excerpted from tty.h, 386BSD Release 0.0 - wfj] */
/* Ring buffers provide a contiguous, dense storage for character data used
* by the tty driver. */
#define RBSZ 1024
struct ringb {
char *rb_hd; /* head of buffer segment to be read */
char *rb_tl; /* tail of buffer segment to be written */
char rb_buf[RBSZ]; /* segment contents */
};
#define RB_SUCC(rbp, p) \
((p) >= (rbp)->rb_buf + RBSZ - 1 ? (rbp)->rb_buf : (p) + 1)
#define RB_ROLLOVER(rbp, p) \
((p) > (rbp)->rb_buf + RBSZ - 1 ? (rbp)->rb_buf : (p))
#define RB_PRED(rbp, p) \
((p) <= (rbp)->rb_buf ? (rbp)->rb_buf + RBSZ - 1 : (p) - 1)
#define RB_LEN(rp) \
((rp)->rb_hd <= (rp)->rb_tl ? (rp)->rb_tl - (rp)->rb_hd : \
RBSZ - ((rp)->rb_hd - (rp)->rb_tl))
#define RB_CONTIGPUT(rp) \
(RB_PRED(rp, (rp)->rb_hd) < (rp)->rb_tl ? \
(rp)->rb_buf + RBSZ - (rp)->rb_tl : \
RB_PRED(rp, (rp)->rb_hd) - (rp)->rb_tl)
#define RB_CONTIGGET(rp) \
((rp)->rb_hd <= (rp)->rb_tl ? (rp)->rb_tl - (rp)->rb_hd : \
(rp)->rb_buf + RBSZ - (rp)->rb_hd)